import os
import utils
import imageutils
import adv_laneline_detection
import matplotlib.image as mpimg
from importlib import reload
import matplotlib.pyplot as plt
import numpy as np
import cv2
import kerasmodel
import datasetclasses
from PIL import Image
reload(utils)
reload(imageutils)
reload(adv_laneline_detection)
# load in Chessboard Calibration images
directory = "camera_cal/"
calibration_images = utils.Load_images_for_directory(directory)
utils.show_images(calibration_images, image_name='all_camera_calibration_images', cols=3)
# calibration_points = utils.load_calibration_points_from_pickle()
# if utils.CALIBRATION_PICKLE_FILE in os.listdir('.') and len(calibration_points) > 1:
# # Load calibration images from pickle file
# # calibration_points = utils.load_calibration_points_from_pickle()
# object_points = calibration_points['object_points']
# image_points = calibration_points['image_points']
# plt.imshow(mpimg.imread('output_images/all_calibration_corner_images.png'))
# else :
images_object_points, images_points, corner_images = adv_laneline_detection.get_images_points(calibration_images)
utils.show_images(corner_images, image_name='all_calibration_corner_images', cols=3)
# Save Calibration points to pickle file
# utils.save_calibration_points_to_pickle(object_points, image_points)
image_shape = calibration_images[0].shape[1::-1]
ret, mtx, dist = adv_laneline_detection.calibrate_camera(images_object_points, images_points, image_shape)
if ret:
undistorted_images = [adv_laneline_detection.undistort_image(img, mtx, dist) for img in calibration_images]
# undistorted_images = adv_laneline_detection.undistort_images(object_points,
# image_points,
# calibration_images)
utils.show_images(undistorted_images, image_name='all_undistorted_calibration_images', cols=3)
original_undistorted = [calibration_images[0], undistorted_images[0],
calibration_images[1], undistorted_images[1]]
utils.show_images(original_undistorted, ['Original', 'Undistorted', 'Original', 'Undistorted'], cols=2)
Here we see 2 images with there original distorted and undistorted variant, we can obviously identify the fisheye distortion that's in the first image and the straighter undistorted image, the second image is another story, the distortion is not as clear as in the the first image, but we can see the difference when we have it
import pickle
calibration_values = { "mtx": mtx, "dist": dist }
pickle.dump( calibration_values, open( "calibration_values.p", "wb" ) )
# Read in the test images
dir = "test_images/"
project_test_images = [mpimg.imread(dir + image_name) for image_name in os.listdir(dir)]
utils.show_images(project_test_images, image_name='all_project_test_images', cols=2)
project_test_images = [project_test_images[0],
project_test_images[2],
project_test_images[3],
project_test_images[6]]
project_undistorted_images = [adv_laneline_detection.undistort_image(img, mtx, dist) for img in project_test_images]
original_undistorted, titles = [], []
for original_img, undistorted_img in zip(project_test_images, project_undistorted_images):
original_undistorted.append(original_img)
titles.append('Original')
original_undistorted.append(undistorted_img)
titles.append('Undistorted')
utils.show_images(original_undistorted,
titles,
image_name='project_original_undistorted_images', cols=2)
im = adv_laneline_detection.undistort_image(project_test_images[0], mtx, dist)
plt.imshow(im)
cv2.imwrite('messigray.png',im)
Here we explore the benefits we can get from using a different color space
R_channel = project_undistorted_images[0][:, :, 0]
threshold = (200, 250)
binary_red = np.zeros_like(R_channel)
binary_red[(R_channel > threshold[0]) & (R_channel <= threshold[1])] = 1
utils.show_images([project_undistorted_images[0], R_channel, binary_red],
['Undistorted', 'Red channel', 'Binary red'],
image_name='undistorted_red_images', cols=2)
Here I'll use the HLS color space as I already did in a prior project and it yielded the best results.
hls_image = imageutils.convert_to_hsl(project_undistorted_images[0])
S_channel = hls_image[:, :, 2]
threshold = (170, 255)
binary_S = np.zeros_like(S_channel)
binary_S[(S_channel > threshold[0]) & (S_channel <= threshold[1])] = 1
utils.show_images([project_undistorted_images[-1], hls_image, S_channel, binary_S],
['Undistorted', 'HSL', 'S channel', 'Binary S channel'],
image_name='undistorted_hsl_images', cols=2)
Here I added the two color space as recommended by the reviewer and got pretty decen
binary = adv_laneline_detection.other_color_thresholds(project_undistorted_images[-1])
utils.show_images([project_undistorted_images[-1], binary],
['Undistorted', 'Binary'],
image_name='luvandlab', cols=2)
As we saw the R channel correctly identifies the white line and the S channel correctly identifies lines under different lighting conditions, so lets combine both results to get a robust representation of our lanelines.
combined_color_binary = np.zeros_like(binary_red)
combined_color_binary[(binary_red == 1) | (binary_S == 1) ] = 1
utils.show_images([project_undistorted_images[0], binary_red, binary_S, combined_color_binary],
['Undistorted', 'Red binary', 'S binary', 'Combined binary'],
image_name='undistorted_combined_color_images', cols=2)
Lets take the third image as an example
abs_threshold = (20, 100)
gray_image = imageutils.convert_to_gray(project_undistorted_images[0])
sobelx = adv_laneline_detection.abs_sobel_thresh(gray_image, 'x', 3, abs_threshold)
sobely = adv_laneline_detection.abs_sobel_thresh(gray_image, 'y', 3, abs_threshold)
utils.show_images([project_undistorted_images[0], gray_image, sobelx, sobely],
['Undistorted', 'Gray', 'Sobel X', 'Sobel Y'],
image_name='undistorted_sobelxy_images', cols=2)
mag_threshold = (40, 100)
dir_threshold = (1.0, 1.3)
sobel_mag = adv_laneline_detection.mag_sobel_thresh(gray_image, 17, mag_threshold)
sobel_dir = adv_laneline_detection.dir_sobel_thresh(gray_image, 17, dir_threshold)
utils.show_images([project_undistorted_images[0], gray_image, sobel_mag, sobel_dir],
['Undistorted', 'Gray', 'Sobel Mag', 'Sobel Dir'],
image_name='undistorted_sobel_mag_dir_images', cols=2)
utils.show_images([project_undistorted_images[-1], gray_image, sobelx, sobely, sobel_mag, sobel_dir],
['Undistorted', 'Gray', 'Sobel X', 'Sobel Y','Sobel Mag', 'Sobel Dir'],
image_name='undistorted_all_sobel_images', cols=2)
abs_threshold = (20, 100)
gray_image = imageutils.convert_to_gray(project_undistorted_images[0])
combined_sobel_xy = adv_laneline_detection.combined_abs_sobelxy_thresh(gray_image, 3, abs_threshold)
utils.show_images([sobelx, sobely, combined_sobel_xy],
['Sobelx', 'Sobely', 'Combined Sobel'],
image_name='combined_sobelxy_images', cols=2)
mag_threshold = (70, 100)
dir_threshold = (0.9, 1.3)
combined_sobel_mag_dir = adv_laneline_detection.combined_sobel_mag_dir_thresh(gray_image, 17, mag_threshold, dir_threshold)
utils.show_images([sobel_mag, sobel_dir, combined_sobel_mag_dir],
['Sobel Mag', 'Sobel Dir', 'Combined Sobel'],
image_name='combined_sobel_mag_dir_images', cols=2)
combined_sobel = np.zeros_like(gray_image)
combined_sobel[(combined_sobel_xy == 1) | (combined_sobel_mag_dir == 1) ] = 1
utils.show_images([combined_sobel_xy, combined_sobel_mag_dir, combined_sobel],
['Combined SobelXY', 'Combined SobelMagDir', 'Combined Sobel'],
image_name='combined_sobel_imagess', cols=2)
Lets now get the best of both worlds and combine the result of the color thresholding and the one from the gradients to try and get a robust representation of our laneline under different conditions.
combined_color_gradient = np.zeros_like(combined_sobel)
combined_color_gradient[(combined_color_binary == 1) | (combined_sobel == 1)] = 1
stacked = np.dstack((np.zeros_like(combined_color_gradient), combined_color_binary, combined_sobel)) * 255
utils.show_images([combined_color_binary, combined_sobel, stacked, combined_color_gradient],
['Combined Color', 'Combined Sobel', 'Stacked Color & Sobel', 'Combined Color & Sobel'],
image_name='combined_color_sobel_images', cols=2)
Lets apply thresholding to all of our test images
# Color Thresholds
red_thresh = (220, 250)
hls_thresh = (90, 255)
hls2_thresh = (170, 255)
# Gradient Thresholds
xy_threshold = (20, 100)
mag_threshold = (70, 100)
dir_threshold = (1.1, 1.3)
thresholded_images = []
for img in project_test_images:
# Undistort
undistorted_image = adv_laneline_detection.undistort_image(img, mtx, dist)
# Color Thresholding
color_binary_threshold = adv_laneline_detection.combined_color_threshold(undistorted_image, red_thresh, hls2_thresh)
# Gradient Thresholding (Sobel)
sobel_binary_threshold = adv_laneline_detection.combined_sobel_thresh(undistorted_image,
abs_kernel=3,
mag_dir_kernel=17,
abs_thresh=xy_threshold,
mag_thresh=mag_threshold,
dir_thresh=dir_threshold)
# Staked result
stacked = np.dstack((np.zeros_like(color_binary_threshold), color_binary_threshold, sobel_binary_threshold)) * 255
combined_color_gradient = np.zeros_like(color_binary_threshold)
combined_color_gradient[(color_binary_threshold == 1) | (sobel_binary_threshold == 1)] = 1
utils.show_images([color_binary_threshold, sobel_binary_threshold, stacked, combined_color_gradient],
['Combined Color', 'Combined Sobel', 'Stacked Color & Sobel', 'Combined Color & Sobel'],
cols=2)
thresholded_images.append(combined_color_gradient)
# img = project_test_images[-1]
# undistorted_image = adv_laneline_detection.undistort_image(img, mtx, dist)
# binary_red = adv_laneline_detection.red_color_threshold(undistorted_image, red_thresh)
# binary_S = adv_laneline_detection.hls_color_threshold(undistorted_image, hls_thresh)
# b_s2 = adv_laneline_detection.hls_color_threshold(undistorted_image, hls2_thresh)
# combined_color_binary = np.zeros_like(binary_red)
# combined_color_binary[(binary_red == 1) | (binary_S == 1) ] = 1
# utils.show_images([project_undistorted_images[3], binary_red, binary_S, b_s2],
# ['Undistorted', 'Red binary', 'S binary', 'Combined binary'],
# image_name='undistorted_combined_color_images', cols=2)
Finally we'll be using a mask to only get an area of the image that has the lanelines and ignore other parts of the road like other cars, trees, mountains, etc.. Here I'm trying out different src and dst to ultimately use these values for warping.
width, height = project_undistorted_images[0].shape[1], project_undistorted_images[0].shape[0]
# Top-Left, Top-right, Bottom-right, Bottom-left
vertices = np.array([[(width * 0.10, height) # Bottom left
, (width * 0.40, height * 0.65) # Top left vertix 60% if the image's hight
, (width * 0.60, height * 0.65) # Top right vetrix
, (width * 0.95, height)]] # Bottom right
, dtype=np.int32)
vertices = np.array([[(width * 0.10, height * 0.95) # Bottom left
, (width * 0.45, height * 0.63) # Top left vertix 60% if the image's hight
, (width * 0.56, height * 0.63) # Top right vetrix
, (width * 0.94, height * 0.95)]] # Bottom right
, dtype=np.int32)
masked_images = [adv_laneline_detection.region_of_interest(img, vertices) for img in project_undistorted_images]
undistorted_masked, titles = [], []
for undistorted_img, masked_image in zip(project_undistorted_images, masked_images):
undistorted_masked.append(undistorted_img)
titles.append('Undistorted')
undistorted_masked.append(masked_image)
titles.append('Masked')
utils.show_images(undistorted_masked,
titles,
image_name='project_original_undistorted_images', cols=2)
Lets now first apply camera calibration for our real world images then perspective transform it.
image_offset = 10
dest = np.float32([[image_offset, 0],# Top left
[image_offset, height], # Bottom left
[width - image_offset, height], # Bottom right
[width - image_offset , 0]]) # Top right
src = np.float32([[width * 0.45, height * 0.63] # Top left vertix 60% if the image's hight
, [width * 0.10, height * 0.95] # Bottom left
, [width * 0.94, height * 0.95] # Bottom right
, [width * 0.56, height * 0.63]]) # Top right vetrix
undistorted_warped, titles = [], []
warped_images = []
M, Minv = None, None
for undistorted_image in thresholded_images:#project_undistorted_images:#
titles.append('Undistorted')
undistorted_warped.append(undistorted_image)
warped_image, M, Minv = adv_laneline_detection.warp_and_transform_image(undistorted_image, src, dest)
undistorted_warped.append(warped_image)
titles.append('Warped')
warped_images.append(warped_image)
warped_images.append(mpimg.imread('./images/warped_example.jpg'))
utils.show_images(undistorted_warped,
titles,
image_name='project_undistorted_warped_images', cols=2)
# Smooth Threshold result
warped_images = [adv_laneline_detection.gaussian_blur(img, 11) for img in warped_images]
# print(len(warped_images))
# g = adv_laneline_detection.gaussian_blur(warped_images[1], 11)
# p = warped_images[1]
# utils.show_images([p, g],
# titles,
# image_name='project_udfgndistorted_warped_images', cols=2)
# window settings
window_width = 100
window_height = warped_images[0].shape[0] / 6 # Break image into 9 vertical layers since image height is 720
print(window_height)
margin = 100 # How much to slide left and right for searching
left_fits, right_fits = [], []
for warped, original_image in zip(warped_images, project_test_images):
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Get lanes centroids (Windows)
window_centroids = adv_laneline_detection.find_window_centroids(warped,
window_width,
window_height,
margin)
# If we found any window centers
# if len(window_centroids) > 0:
# print('Yay')
output, left_indices, right_indices = adv_laneline_detection.draw_lines_windows(warped,
window_width,
window_height,
nonzerox,
nonzeroy,
window_centroids)
# Display the final results
plt.imshow(output)
plt.title('window fitting results')
# # If no window centers found, just display orginal road image
# else:
# output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
# Extract left and right line pixel positions
leftx = nonzerox[left_indices]
lefty = nonzeroy[left_indices]
rightx = nonzerox[right_indices]
righty = nonzeroy[right_indices]
# Get poly fits for the lane lines
left_fit, right_fit = adv_laneline_detection.polyfit_lines(leftx, lefty, rightx, righty)
# Color lane lines
# making the original road pixels 3 color channels
out_img = np.dstack((warped, warped, warped)) * 255
out_img[nonzeroy[left_indices], nonzerox[left_indices]] = [255, 0, 0]
out_img[nonzeroy[right_indices], nonzerox[right_indices]] = [0, 0, 255]
# Draw polyfit and lane lines
ploty, left_fitx, right_fitx = adv_laneline_detection.compute_polyfit(left_fit,
right_fit,
out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.show()
# Get curvature
lane_curvature = adv_laneline_detection.get_curvature(lefty, leftx, righty, rightx)
print('Average curvature is {}'.format(lane_curvature))
# Get car position
car_position = adv_laneline_detection.get_vehicle_position(img, left_fitx, right_fitx)
print('Car position of center is {}'.format(car_position))
# Draw Drivable area
result = adv_laneline_detection.draw_drivable_area(warped,
original_image,
ploty,
left_fitx,
right_fitx,
Minv)
plt.imshow(result)
plt.show()
utils.show_images([output, result], ['Warped Poly fitted', 'Projected Drivable area'], image_name='1', cols=2)
# Get curvature
lane_curvature = adv_laneline_detection.get_curvature(lefty, leftx, righty, rightx)
print('Average curvature is {}'.format(lane_curvature))
utils.show_images([result], ['Projected Drivable area'], image_name='drawn_drivable_area', cols=1)
Second round of training the model. Uncomment if you want to train the model.
# training_dir = 'seg_train/train/'
# labels_dir = 'seg_train/new_labels/'
# training_images_paths = [(training_dir + image_name) for image_name in os.listdir(training_dir)]
# label_images_paths = [(labels_dir + image_name) for image_name in os.listdir(labels_dir)]
# dataset = datasetclasses.Dataset(training_images_paths, label_images_paths)
# print('Training on {} images'.format(dataset.train.len))
# print('Validating on {} images'.format(dataset.valid.len))
# print(training_images_paths[0])
# print(label_images_paths[0])
# # utils.show_images(label_images)
# BATCHSIZE = 8
# print('Training generator')
# train_generator, train_steps_per_epoch = kerasmodel.get_data_generator_and_steps_per_epoch(dataset.train,
# BATCHSIZE)
# print('Validation generator')
# validation_generator, validation_steps_per_epoch = kerasmodel.get_data_generator_and_steps_per_epoch(dataset.valid,
# BATCHSIZE,
# validation=True)
# print('Training steps per epoch {}'.format(train_steps_per_epoch))
# print('Validation steps per epoch {}'.format(validation_steps_per_epoch))
# model_file = 'model_berkely_drivable_and_small_2.h5'
# k_model = kerasmodel.KerasModel(model_file=model_file,
# load=True)
# EPOCHS = 20
# # k_model.model.summary()
# # Training the KerasModel model and getting the metrics
# model_history = k_model.train_model_with_generator(train_generator,
# train_steps_per_epoch,
# EPOCHS,
# validation_generator,
# validation_steps_per_epoch,
# save_model_filepath=model_file)
# # Plotting the model Loss
# utils.plot_loss(model_history=model_history)
image_to_predict = np.array(Image.open('test_images/straight_lines1.jpg'))
challenge_image_to_predict = np.array(Image.open('challenge_test_images/challenge1.jpg'))
model_file = 'model_berkely_drivable_and_small_2.h5'
k_model = kerasmodel.KerasModel(model_file=model_file,
load=True)
lane_image = k_model.predict(image_to_predict)
challenge_lane_image = k_model.predict(challenge_image_to_predict)
img = cv2.addWeighted(new_copy, 1, lane_image.astype('uint8')*255, 0.7, 0)
plt.imshow(img)
x = np.array([])
x.size
print(lane_image.shape)
utils.show_images([image_to_predict, lane_image, challenge_image_to_predict, challenge_lane_image], image_name='predictions', cols=2)
print(lane_image.shape)
utils.show_images([image_to_predict, lane_image, challenge_image_to_predict, challenge_lane_image], image_name='predictions', cols=2)
plt.imshow(x)
plt.show()
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist, k_model)
out = lane_line_finder.process_image(x)
plt.imshow(out)
plt.show()
plt.imshow(x)
plt.show()
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)
out = lane_line_finder.process_image(x)
plt.imshow(out)
plt.show()
x = project_undistorted_images[3]
plt.imshow(x)
plt.show()
preprocessed_image = kerasmodel.preprocess_image(x)
plt.imshow(preprocessed_image)
print(preprocessed_image.shape)
plt.show()
prediction = k_model.model.predict(preprocessed_image[None, :, :, :])
true_value = process_prediction(prediction[0], preprocessed_image)
plt.imshow(true_value[:, :, 0])
plt.show()
plt.imshow(cv2.resize(true_value, (1280, 720)))
plt.show()
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
reload(utils)
reload(imageutils)
reload(adv_laneline_detection)
reload(kerasmodel)
model_file = 'model_berkely_drivable_and_small_2.h5'
k_model = kerasmodel.KerasModel(model_file=model_file,
load=True)
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist, k_model)
project_output = 'videos/project_video_SEG.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(project_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist, k_model)
challenge_output = 'videos/challenge_video_SEG.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("challenge_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(challenge_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
lane_line_finderlane_lin = adv_laneline_detection.LaneLineFinder(mtx, dist, k_model)
harder_challenge_output = 'videos/harder_challenge_video_SEG.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("harder_challenge_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(harder_challenge_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
And now we come to the moment of truth which is the video, this will prove or disprove all of our work until now so what are we waiting for let's do this.
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)#, k_model)
project_output = 'videos/project_video_CV.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(project_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)#, k_model)
challenge_output = 'videos/challenge_video_CV.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("challenge_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(challenge_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
reload(utils)
reload(imageutils)
reload(adv_laneline_detection)
reload(kerasmodel)
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)#, k_model)
challenge_output = 'videos/project_video_trimmed.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("project_videoTrim.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(challenge_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
clip1.reader.close()
clip1.audio.reader.close_proc()
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)
img = np.array(Image.open('13.png'))
plt.imshow(lane_line_finder.process_image(img))
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)
img = np.array(Image.open('14.png'))
plt.imshow(lane_line_finder.process_image(img))
lane_line_finder = adv_laneline_detection.LaneLineFinder(mtx, dist)
img = np.array(Image.open('16.png'))
plt.imshow(lane_line_finder.process_image(img))
lane_line_finderlane_lin = adv_laneline_detection.LaneLineFinder(mtx, dist)#, k_model)
harder_challenge_output = 'videos/harder_challenge_video_CV.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("harder_challenge_video.mp4")
white_clip = clip1.fl_image(lane_line_finder.process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(harder_challenge_output, audio=False)
#Close the reader properly
clip1.reader.close()
clip1.audio.reader.close_proc()
This script gets different segmentations the drivable area label and the cars labels and combines them into one label image that then saved for training.
seg = './bb/seg/'
drive = './bb/labels_drivable/'
new_labels = './bb/new_labels'
def preprocess_image_labels(img_name, label_image, seg_image):
LANE_LABEL = 255
CAR_LABEL = 142
labels_new = np.zeros_like(label_image[:, :, :])
# Identify lane marking pixels (label is 255)
lane_marking_pixels = (label_image[:, :, 0]).nonzero()
labels_new[:, :, 2][lane_marking_pixels] = 1
# Identify car pixels (label is 142)
car_pixels = (seg_image[:, :, 2] == CAR_LABEL).nonzero()
labels_new[:, :, 1][car_pixels] = 1
# Find all other labels
other_pixels = ((label_image[:, :, 0] != LANE_LABEL)
& (seg_image[:, :, 2] != CAR_LABEL))
# Remove the labels by setting their pixels to 0 ~ None
labels_new[:, :, 0][other_pixels] = 1
img_name = '{}/{}.png'.format(new_labels ,img_name)
# img = Image.fromarray(labels_new)
return cv2.imwrite(img_name, labels_new)#cv2.resize(labels_new, (256, 256))
for drive_img, seg_img in zip(os.listdir(drive), os.listdir(seg)):
image_name = drive_img.replace(drive, '').replace('_drivable_color.png', '')
preprocess_image_labels(image_name, np.array(Image.open(os.path.join(drive, drive_img))),
np.array(Image.open(os.path.join(seg, seg_img))))
seg = './seg_train/masks/'
new_labels = './seg_train/new_labels'
def preprocess_image_labels(img_name, seg_image):
LANE_LABEL = 7
CAR_LABEL = 26
labels_new = np.zeros_like(seg_image[:, :, :])
# Identify lane marking pixels (label is 255)
lane_marking_pixels = (seg_image[:, :, 0] == LANE_LABEL).nonzero()
labels_new[:, :, 2][lane_marking_pixels] = 1
# Identify car pixels (label is 142)
car_pixels = (seg_image[:, :, 0] == CAR_LABEL).nonzero()
labels_new[:, :, 1][car_pixels] = 1
# Find all other labels
other_pixels = ((seg_image[:, :, 0] != LANE_LABEL)
& (seg_image[:, :, 2] != CAR_LABEL))
# Remove the labels by setting their pixels to 0 ~ None
labels_new[:, :, 0][other_pixels] = 1
img_name = '{}/{}'.format(new_labels ,img_name)
return cv2.imwrite(img_name, labels_new)#cv2.resize(labels_new, (256, 256))
for seg_img in os.listdir(seg):
image_name = seg_img.replace('_mask', '')
preprocess_image_labels(image_name,
np.array(Image.open(os.path.join(seg, seg_img))))
i = np.array(Image.open('./seg_train/new_labels/29.png'))
img = np.array(Image.open('./seg_train/train/29.jpg'))
utils.show_images([img, i[:,:,0], i[:,:,1], i[:,:,2]],
['Image', 'Drivable area', 'Cars', 'None'], image_name='final_labels', cols=4)
seg_image = np.array(Image.open('./bb/train/035b1b38-07da7a0c.jpg'))
i = Image.fromarray(seg_image.astype('uint8'), 'RGB').rotate(-45)
plt.imshow(i)
plt.show()
plt.imshow(seg_image[:, :, :])
print(seg_image[500, 1000, 0])
print(np.array(i)[500, 1000, 0])
plt.show()
plt.imshow(seg_image[:, :, 2])
plt.show()